x86 hvm: Make a couple of hypercall state flags per-vcpu
authorKeir Fraser <keir@xen.org>
Tue, 16 Nov 2010 12:42:35 +0000 (12:42 +0000)
committerKeir Fraser <keir@xen.org>
Tue, 16 Nov 2010 12:42:35 +0000 (12:42 +0000)
This is a prerequisite for allowing guest descheduling within a
hypercall.

Signed-off-by: Keir Fraser <keir@xen.org>
xen/arch/x86/domain.c
xen/arch/x86/hvm/hvm.c
xen/include/asm-x86/hvm/guest_access.h
xen/include/asm-x86/hvm/vcpu.h
xen/include/asm-x86/hypercall.h

index d50cbac751106892f94ad248e4cfce581ead1b8a..4c7440c0e71edfe878c47e66026d231c6c8c7dbd 100644 (file)
@@ -1553,8 +1553,6 @@ void sync_vcpu_execstate(struct vcpu *v)
     __arg;                                                                  \
 })
 
-DEFINE_PER_CPU(char, hc_preempted);
-
 unsigned long hypercall_create_continuation(
     unsigned int op, const char *format, ...)
 {
@@ -1583,12 +1581,12 @@ unsigned long hypercall_create_continuation(
     {
         regs       = guest_cpu_user_regs();
         regs->eax  = op;
-        /*
-         * For PV guest, we update EIP to re-execute 'syscall' / 'int 0x82';
-         * HVM does not need this since 'vmcall' / 'vmmcall' is fault-like.
-         */
+
+        /* Ensure the hypercall trap instruction is re-executed. */
         if ( !is_hvm_vcpu(current) )
             regs->eip -= 2;  /* re-execute 'syscall' / 'int 0x82' */
+        else
+            current->arch.hvm_vcpu.hcall_preempted = 1;
 
 #ifdef __x86_64__
         if ( !is_hvm_vcpu(current) ?
@@ -1629,8 +1627,6 @@ unsigned long hypercall_create_continuation(
                 }
             }
         }
-
-        this_cpu(hc_preempted) = 1;
     }
 
     va_end(args);
index 966ec831d2424b2fc5ca4f63f7d991a7817f99c4..ba0481a13329ca4bf298c69dc7840858fec21558 100644 (file)
@@ -2028,16 +2028,13 @@ enum hvm_copy_result hvm_fetch_from_guest_virt_nofault(
                       PFEC_page_present | pfec);
 }
 
-#ifdef __x86_64__
-DEFINE_PER_CPU(bool_t, hvm_64bit_hcall);
-#endif
-
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned int len)
 {
     int rc;
 
 #ifdef __x86_64__
-    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(to, len) )
+    if ( !current->arch.hvm_vcpu.hcall_64bit &&
+         is_compat_arg_xlat_range(to, len) )
     {
         memcpy(to, from, len);
         return 0;
@@ -2054,7 +2051,8 @@ unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len)
     int rc;
 
 #ifdef __x86_64__
-    if ( !this_cpu(hvm_64bit_hcall) && is_compat_arg_xlat_range(from, len) )
+    if ( !current->arch.hvm_vcpu.hcall_64bit &&
+         is_compat_arg_xlat_range(from, len) )
     {
         memcpy(to, from, len);
         return 0;
@@ -2567,7 +2565,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
         return HVM_HCALL_completed;
     }
 
-    this_cpu(hc_preempted) = 0;
+    curr->arch.hvm_vcpu.hcall_preempted = 0;
 
 #ifdef __x86_64__
     if ( mode == 8 )
@@ -2575,13 +2573,13 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
         HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u(%lx, %lx, %lx, %lx, %lx)", eax,
                     regs->rdi, regs->rsi, regs->rdx, regs->r10, regs->r8);
 
-        this_cpu(hvm_64bit_hcall) = 1;
+        curr->arch.hvm_vcpu.hcall_64bit = 1;
         regs->rax = hvm_hypercall64_table[eax](regs->rdi,
                                                regs->rsi,
                                                regs->rdx,
                                                regs->r10,
                                                regs->r8); 
-        this_cpu(hvm_64bit_hcall) = 0;
+        curr->arch.hvm_vcpu.hcall_64bit = 0;
     }
     else
 #endif
@@ -2601,7 +2599,7 @@ int hvm_do_hypercall(struct cpu_user_regs *regs)
     HVM_DBG_LOG(DBG_LEVEL_HCALL, "hcall%u -> %lx",
                 eax, (unsigned long)regs->eax);
 
-    if ( this_cpu(hc_preempted) )
+    if ( curr->arch.hvm_vcpu.hcall_preempted )
         return HVM_HCALL_preempted;
 
     if ( unlikely(curr->domain->arch.hvm_domain.qemu_mapcache_invalidate) &&
index f401ac9d8d02c279b7b4ab19b84a7b7eff81e992..7a89e815365861573e340f2b083460a273141bbc 100644 (file)
@@ -1,9 +1,6 @@
 #ifndef __ASM_X86_HVM_GUEST_ACCESS_H__
 #define __ASM_X86_HVM_GUEST_ACCESS_H__
 
-#include <xen/percpu.h>
-DECLARE_PER_CPU(bool_t, hvm_64bit_hcall);
-
 unsigned long copy_to_user_hvm(void *to, const void *from, unsigned len);
 unsigned long copy_from_user_hvm(void *to, const void *from, unsigned len);
 
index 1d72ecfc609bb9c668acb47331f44d0abda567d1..682027fffa49f720be3e45eafd4b2e2544a443c0 100644 (file)
@@ -63,6 +63,9 @@ struct hvm_vcpu {
     bool_t              debug_state_latch;
     bool_t              single_step;
 
+    bool_t              hcall_preempted;
+    bool_t              hcall_64bit;
+
     u64                 asid_generation;
     u32                 asid;
 
index 86377862f154ba198c951eac938c88b9b52c06b1..a6615e895ee84c911be09ec9fd300ee50918c68b 100644 (file)
  */
 #define MMU_UPDATE_PREEMPTED          (~(~0U>>1))
 
-/*
- * This gets set to a non-zero value whenever hypercall_create_continuation()
- * is used (outside of multicall context; in multicall context the second call
- * from do_multicall() itself will have this effect). Internal callers of
- * hypercall handlers interested in this condition must clear the flag prior
- * to invoking the respective handler(s).
- */
-DECLARE_PER_CPU(char, hc_preempted);
-
 extern long
 do_event_channel_op_compat(
     XEN_GUEST_HANDLE(evtchn_op_t) uop);